{
int cpu = smp_processor_id();
struct tss_struct *t = &init_tss[cpu];
- char gdt_load[10];
+ struct desc_ptr gdt_desc = {
+ .base = (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
+ .limit = LAST_RESERVED_GDT_BYTE
+ };
if (cpu_test_and_set(cpu, cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
/* Install correct page table. */
write_ptbase(current);
- *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
- *(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(current);
- asm volatile ( "lgdt %0" : "=m" (gdt_load) );
+ asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
/* No nested task. */
asm volatile ("pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
if ( is_idle_domain(d) )
{
v->arch.schedule_tail = continue_idle_domain;
- if ( v->vcpu_id )
- v->arch.cr3 = d->vcpu[0]->arch.cr3;
- else if ( !*idle_vcpu )
- v->arch.cr3 = __pa(idle_pg_table);
- else if ( !(v->arch.cr3 = clone_idle_pagetable(v)) )
- return -ENOMEM;
+ v->arch.cr3 = __pa(idle_pg_table);
}
v->arch.guest_context.ctrlreg[4] =
}
}
+static inline int need_full_gdt(struct vcpu *v)
+{
+ return (!is_hvm_vcpu(v) && !is_idle_vcpu(v));
+}
+
static void __context_switch(void)
{
struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
- unsigned int i, cpu = smp_processor_id();
+ unsigned int cpu = smp_processor_id();
struct vcpu *p = per_cpu(curr_vcpu, cpu);
struct vcpu *n = current;
struct desc_struct *gdt;
- struct page_info *page;
struct desc_ptr gdt_desc;
ASSERT(p != n);
gdt = !is_pv_32on64_vcpu(n) ? per_cpu(gdt_table, cpu) :
per_cpu(compat_gdt_table, cpu);
- page = virt_to_page(gdt);
- for (i = 0; i < NR_RESERVED_GDT_PAGES; ++i)
+ if ( need_full_gdt(n) )
{
- l1e_write(n->domain->arch.mm_perdomain_pt +
- (n->vcpu_id << GDT_LDT_VCPU_SHIFT) +
- FIRST_RESERVED_GDT_PAGE + i,
- l1e_from_page(page + i, __PAGE_HYPERVISOR));
+ struct page_info *page = virt_to_page(gdt);
+ unsigned int i;
+ for ( i = 0; i < NR_RESERVED_GDT_PAGES; i++ )
+ l1e_write(n->domain->arch.mm_perdomain_pt +
+ (n->vcpu_id << GDT_LDT_VCPU_SHIFT) +
+ FIRST_RESERVED_GDT_PAGE + i,
+ l1e_from_page(page + i, __PAGE_HYPERVISOR));
}
- if ( p->vcpu_id != n->vcpu_id )
+ if ( need_full_gdt(p) &&
+ ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(n)) )
{
gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
gdt_desc.base = (unsigned long)(gdt - FIRST_RESERVED_GDT_ENTRY);
write_ptbase(n);
- if ( p->vcpu_id != n->vcpu_id )
+ if ( need_full_gdt(n) &&
+ ((p->vcpu_id != n->vcpu_id) || !need_full_gdt(p)) )
{
+ gdt_desc.limit = LAST_RESERVED_GDT_BYTE;
gdt_desc.base = GDT_VIRT_START(n);
asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
}
}
}
-/* We run on dom0's page tables for the final part of the build process. */
-static void dom0_pt_enter(struct vcpu *v)
-{
- struct desc_ptr gdt_desc = {
- .limit = LAST_RESERVED_GDT_BYTE,
- .base = (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY)
- };
-
- asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
- write_ptbase(v);
-}
-
-/* Return to idle domain's page tables. */
-static void dom0_pt_exit(void)
-{
- struct desc_ptr gdt_desc = {
- .limit = LAST_RESERVED_GDT_BYTE,
- .base = GDT_VIRT_START(current)
- };
-
- write_ptbase(current);
- asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
-}
-
int __init construct_dom0(
struct domain *d,
unsigned long _image_start, unsigned long image_len,
else
update_cr3(v);
- dom0_pt_enter(v);
+ /* We run on dom0's page tables for the final part of the build process. */
+ write_ptbase(v);
/* Copy the OS image and free temporary buffer. */
elf.dest = (void*)vkern_start;
(parms.virt_hypercall >= v_end) )
{
write_ptbase(current);
- local_irq_enable();
printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
return -1;
}
- hypercall_page_initialise(d, (void *)(unsigned long)parms.virt_hypercall);
+ hypercall_page_initialise(
+ d, (void *)(unsigned long)parms.virt_hypercall);
}
/* Copy the initial ramdisk. */
xlat_start_info(si, XLAT_start_info_console_dom0);
#endif
- dom0_pt_exit();
+ /* Return to idle domain's page tables. */
+ write_ptbase(current);
#if defined(__i386__)
/* Destroy low mappings - they were only for our convenience. */
{
unsigned int cpu = smp_processor_id();
+ __vmwrite(HOST_GDTR_BASE,
+ (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY));
__vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
__vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
__vmwrite(IO_BITMAP_A, virt_to_maddr((char *)hvm_io_bitmap + 0));
__vmwrite(IO_BITMAP_B, virt_to_maddr((char *)hvm_io_bitmap + PAGE_SIZE));
- /* Host GDTR base. */
- __vmwrite(HOST_GDTR_BASE, GDT_VIRT_START(v));
-
/* Host data selectors. */
__vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
__vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
static void __init init_idle_domain(void)
{
struct domain *idle_domain;
- unsigned int i;
/* Domain creation requires that scheduler structures are initialised. */
scheduler_init();
idle_vcpu[0] = this_cpu(curr_vcpu) = current;
setup_idle_pagetable();
-
- for (i = 0; i < NR_RESERVED_GDT_PAGES; ++i)
- idle_domain->arch.mm_perdomain_pt[FIRST_RESERVED_GDT_PAGE + i] =
- l1e_from_page(virt_to_page(boot_cpu_gdt_table) + i,
- __PAGE_HYPERVISOR);
-
}
static void __init srat_detect_node(int cpu)
parse_video_info();
set_current((struct vcpu *)0xfffff000); /* debug sanity */
+ idle_vcpu[0] = current;
set_processor_id(0); /* needed early, for smp_processor_id() */
if ( cpu_has_efer )
rdmsrl(MSR_EFER, this_cpu(efer));
*/
{
unsigned long boot_error;
- unsigned int i;
+ unsigned int order;
int timeout;
unsigned long start_eip;
unsigned short nmi_high = 0, nmi_low = 0;
gdt = per_cpu(gdt_table, cpu);
if (gdt == boot_cpu_gdt_table) {
- i = get_order_from_pages(NR_RESERVED_GDT_PAGES);
+ order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
#ifdef __x86_64__
#ifdef CONFIG_COMPAT
- page = alloc_domheap_pages(NULL, i,
+ page = alloc_domheap_pages(NULL, order,
MEMF_node(cpu_to_node(cpu)));
per_cpu(compat_gdt_table, cpu) = gdt = page_to_virt(page);
memcpy(gdt, boot_cpu_compat_gdt_table,
NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
#endif
- page = alloc_domheap_pages(NULL, i,
+ page = alloc_domheap_pages(NULL, order,
MEMF_node(cpu_to_node(cpu)));
per_cpu(gdt_table, cpu) = gdt = page_to_virt(page);
#else
- per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(i);
+ per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order);
#endif
memcpy(gdt, boot_cpu_gdt_table,
NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
}
- for (i = 0; i < NR_RESERVED_GDT_PAGES; ++i)
- v->domain->arch.mm_perdomain_pt
- [(v->vcpu_id << GDT_LDT_VCPU_SHIFT) +
- FIRST_RESERVED_GDT_PAGE + i]
- = l1e_from_page(virt_to_page(gdt) + i,
- __PAGE_HYPERVISOR);
-
#ifdef __i386__
if (!per_cpu(doublefault_tss, cpu)) {
per_cpu(doublefault_tss, cpu) = alloc_xenheap_page();
__PAGE_HYPERVISOR));
}
-unsigned long clone_idle_pagetable(struct vcpu *v)
-{
- unsigned int i;
- struct domain *d = v->domain;
- l3_pgentry_t *l3_table = v->arch.pae_l3_cache.table[0];
- l2_pgentry_t *l2_table = alloc_xenheap_page();
-
- if ( !l2_table )
- return 0;
-
- memcpy(l3_table, idle_pg_table, L3_PAGETABLE_ENTRIES * sizeof(*l3_table));
- l3_table[l3_table_offset(PERDOMAIN_VIRT_START)] =
- l3e_from_page(virt_to_page(l2_table), _PAGE_PRESENT);
-
- copy_page(l2_table, idle_pg_table_l2 +
- l3_table_offset(PERDOMAIN_VIRT_START) * L2_PAGETABLE_ENTRIES);
- for ( i = 0; i < PDPT_L2_ENTRIES; ++i )
- l2_table[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
- l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt) + i,
- __PAGE_HYPERVISOR);
-
- return __pa(l3_table);
-}
-
void __init zap_low_mappings(l2_pgentry_t *dom0_l2)
{
int i;
#include <xen/lib.h>
#include <xen/init.h>
#include <xen/mm.h>
-#include <xen/numa.h>
#include <xen/sched.h>
#include <xen/guest_access.h>
#include <asm/current.h>
__PAGE_HYPERVISOR));
}
-unsigned long clone_idle_pagetable(struct vcpu *v)
-{
- struct domain *d = v->domain;
- struct page_info *page = alloc_domheap_page(NULL,
- MEMF_node(vcpu_to_node(v)));
- l4_pgentry_t *l4_table = page_to_virt(page);
-
- if ( !page )
- return 0;
-
- copy_page(l4_table, idle_pg_table);
- l4_table[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
- __PAGE_HYPERVISOR);
-
- return __pa(l4_table);
-}
-
void __init zap_low_mappings(void)
{
BUG_ON(num_online_cpus() != 1);
#endif
void paging_init(void);
void setup_idle_pagetable(void);
-unsigned long clone_idle_pagetable(struct vcpu *);
#endif /* !defined(__ASSEMBLY__) */
#define _PAGE_PRESENT 0x001U